in Linux and libxc.
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
return err;
op.cmd = DOM0_MICROCODE;
- op.u.microcode.data = user_buffer;
+ SET_XEN_GUEST_HANDLE(op.u.microcode.data, user_buffer);
op.u.microcode.length = user_buffer_size;
err = HYPERVISOR_dom0_op(&op);
#ifdef CONFIG_XEN
map = alloc_bootmem_low_pages(PAGE_SIZE);
op.cmd = DOM0_PHYSICAL_MEMORY_MAP;
- op.u.physical_memory_map.memory_map = map;
+ SET_XEN_GUEST_HANDLE(op.u.physical_memory_map.memory_map, map);
op.u.physical_memory_map.max_map_entries =
PAGE_SIZE / sizeof(struct dom0_memory_map_entry);
BUG_ON(HYPERVISOR_dom0_op(&op));
pte_t *pte;
unsigned long frame, i, flags;
struct xen_memory_reservation reservation = {
- .extent_start = &frame,
.nr_extents = 1,
.extent_order = 0,
.domid = DOMID_SELF
};
+ SET_XEN_GUEST_HANDLE(reservation.extent_start, &frame);
/*
* Currently an auto-translated guest will not perform I/O, nor will
pte_t *pte;
unsigned long frame, i, flags;
struct xen_memory_reservation reservation = {
- .extent_start = &frame,
.nr_extents = 1,
.extent_order = 0,
.domid = DOMID_SELF
};
+ SET_XEN_GUEST_HANDLE(reservation.extent_start, &frame);
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
map = alloc_bootmem_low_pages(PAGE_SIZE);
op.cmd = DOM0_PHYSICAL_MEMORY_MAP;
- op.u.physical_memory_map.memory_map = map;
+ SET_XEN_GUEST_HANDLE(op.u.physical_memory_map.memory_map, map);
op.u.physical_memory_map.max_map_entries =
PAGE_SIZE / sizeof(struct dom0_memory_map_entry);
BUG_ON(HYPERVISOR_dom0_op(&op));
page = balloon_next_page(page);
}
- reservation.extent_start = frame_list;
+ SET_XEN_GUEST_HANDLE(reservation.extent_start, frame_list);
reservation.nr_extents = nr_pages;
rc = HYPERVISOR_memory_op(
XENMEM_populate_physmap, &reservation);
if (rc < nr_pages) {
int ret;
/* We hit the Xen hard limit: reprobe. */
- reservation.extent_start = frame_list;
+ SET_XEN_GUEST_HANDLE(reservation.extent_start, frame_list);
reservation.nr_extents = rc;
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
&reservation);
balloon_append(pfn_to_page(pfn));
}
- reservation.extent_start = frame_list;
+ SET_XEN_GUEST_HANDLE(reservation.extent_start, frame_list);
reservation.nr_extents = nr_pages;
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
BUG_ON(ret != nr_pages);
unsigned long mfn = pte_mfn(*pte);
int ret;
struct xen_memory_reservation reservation = {
- .extent_start = &mfn,
.nr_extents = 1,
.extent_order = 0,
.domid = DOMID_SELF
};
+ SET_XEN_GUEST_HANDLE(reservation.extent_start, &mfn);
set_pte_at(&init_mm, addr, pte, __pte_ma(0));
set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
setup.dom = DOMID_SELF;
setup.nr_frames = NR_GRANT_FRAMES;
- setup.frame_list = frames;
+ SET_XEN_GUEST_HANDLE(setup.frame_list, frames);
rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
if (rc == -ENOSYS)
{
unsigned long mfn = 0, flags;
struct xen_memory_reservation reservation = {
- .extent_start = mfn_list,
.nr_extents = MAX_MFN_ALLOC,
.extent_order = 0,
.domid = DOMID_SELF
};
+ SET_XEN_GUEST_HANDLE(reservation.extent_start, mfn_list);
spin_lock_irqsave(&mfn_lock, flags);
if ( unlikely(alloc_index == 0) )
alloc_index = HYPERVISOR_memory_op(
/* Tell the ballon driver what is going on. */
balloon_update_driver_allowance(i);
- reservation.extent_start = np->rx_pfn_array;
+ SET_XEN_GUEST_HANDLE(reservation.extent_start, np->rx_pfn_array);
reservation.nr_extents = i;
reservation.extent_order = 0;
reservation.address_bits = 0;
evtchn_port_t *ports, unsigned int nr_ports, u64 timeout)
{
struct sched_poll sched_poll = {
- .ports = ports,
.nr_ports = nr_ports,
.timeout = jiffies_to_st(timeout)
};
+ int rc;
- int rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll);
+ SET_XEN_GUEST_HANDLE(sched_poll.ports, ports);
+ rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll);
if (rc == -ENOSYS)
rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0);
op.cmd = DOM0_GETDOMAININFOLIST;
op.u.getdomaininfolist.first_domain = first_domain;
op.u.getdomaininfolist.max_domains = max_domains;
- op.u.getdomaininfolist.buffer = info;
+ SET_XEN_GUEST_HANDLE(op.u.getdomaininfolist.buffer, info);
if ( xc_dom0_op(xc_handle, &op) < 0 )
ret = -1;
op.cmd = DOM0_GETVCPUCONTEXT;
op.u.getvcpucontext.domain = (domid_t)domid;
op.u.getvcpucontext.vcpu = (uint16_t)vcpu;
- op.u.getvcpucontext.ctxt = ctxt;
+ SET_XEN_GUEST_HANDLE(op.u.getvcpucontext.ctxt, ctxt);
if ( (rc = mlock(ctxt, sizeof(*ctxt))) != 0 )
return rc;
op.cmd = DOM0_SHADOW_CONTROL;
op.u.shadow_control.domain = (domid_t)domid;
op.u.shadow_control.op = sop;
- op.u.shadow_control.dirty_bitmap = dirty_bitmap;
+ SET_XEN_GUEST_HANDLE(op.u.shadow_control.dirty_bitmap, dirty_bitmap);
op.u.shadow_control.pages = pages;
rc = do_dom0_op(xc_handle, &op);
{
int err;
struct xen_memory_reservation reservation = {
- .extent_start = extent_start, /* may be NULL */
.nr_extents = nr_extents,
.extent_order = extent_order,
.address_bits = address_bits,
.domid = domid
};
+ /* may be NULL */
+ SET_XEN_GUEST_HANDLE(reservation.extent_start, extent_start);
+
err = xc_memory_op(xc_handle, XENMEM_increase_reservation, &reservation);
if ( err == nr_extents )
return 0;
{
int err;
struct xen_memory_reservation reservation = {
- .extent_start = extent_start,
.nr_extents = nr_extents,
.extent_order = extent_order,
.address_bits = 0,
.domid = domid
};
+ SET_XEN_GUEST_HANDLE(reservation.extent_start, extent_start);
+
if ( extent_start == NULL )
{
fprintf(stderr,"decrease_reservation extent_start is NULL!\n");
{
int err;
struct xen_memory_reservation reservation = {
- .extent_start = extent_start,
.nr_extents = nr_extents,
.extent_order = extent_order,
.address_bits = address_bits,
.domid = domid
};
+ SET_XEN_GUEST_HANDLE(reservation.extent_start, extent_start);
err = xc_memory_op(xc_handle, XENMEM_populate_physmap, &reservation);
if ( err == nr_extents )
struct xen_translate_gpfn_list op = {
.domid = domid,
.nr_gpfns = nr_gpfns,
- .gpfn_list = gpfn_list,
- .mfn_list = mfn_list
};
+ SET_XEN_GUEST_HANDLE(op.gpfn_list, gpfn_list);
+ SET_XEN_GUEST_HANDLE(op.mfn_list, mfn_list);
return xc_memory_op(xc_handle, XENMEM_translate_gpfn_list, &op);
}
op.cmd = DOM0_SETVCPUCONTEXT;
op.u.setvcpucontext.domain = domid;
op.u.setvcpucontext.vcpu = vcpu;
- op.u.setvcpucontext.ctxt = ctxt;
+ SET_XEN_GUEST_HANDLE(op.u.setvcpucontext.ctxt, ctxt);
if ( (rc = mlock(ctxt, sizeof(*ctxt))) != 0 )
return rc;
launch_op.u.setvcpucontext.domain = (domid_t)domid;
launch_op.u.setvcpucontext.vcpu = 0;
- launch_op.u.setvcpucontext.ctxt = ctxt;
+ SET_XEN_GUEST_HANDLE(launch_op.u.setvcpucontext.ctxt, ctxt);
launch_op.cmd = DOM0_SETVCPUCONTEXT;
rc = xc_dom0_op(xc_handle, &launch_op);
launch_op.u.setvcpucontext.domain = (domid_t)domid;
launch_op.u.setvcpucontext.vcpu = 0;
- launch_op.u.setvcpucontext.ctxt = ctxt;
+ SET_XEN_GUEST_HANDLE(launch_op.u.setvcpucontext.ctxt, ctxt);
launch_op.cmd = DOM0_SETVCPUCONTEXT;
rc = xc_dom0_op(xc_handle, &launch_op);
if (count > 0) {
struct xen_memory_reservation reservation = {
- .extent_start = pfntab,
.nr_extents = count,
.extent_order = 0,
.domid = dom
};
+ SET_XEN_GUEST_HANDLE(reservation.extent_start, pfntab);
if ((rc = xc_memory_op(xc_handle, XENMEM_decrease_reservation,
&reservation)) != count) {
op.cmd = DOM0_SETVCPUCONTEXT;
op.u.setvcpucontext.domain = (domid_t)dom;
op.u.setvcpucontext.vcpu = 0;
- op.u.setvcpucontext.ctxt = &ctxt;
+ SET_XEN_GUEST_HANDLE(op.u.setvcpucontext.ctxt, &ctxt);
rc = xc_dom0_op(xc_handle, &op);
if (rc != 0) {
privcmd_mmap_entry_t *entries;
unsigned long m2p_chunks, m2p_size;
unsigned long *m2p;
+ unsigned long *extent_start;
int i, rc;
m2p_size = M2P_SIZE(max_mfn);
m2p_chunks = M2P_CHUNKS(max_mfn);
xmml.max_extents = m2p_chunks;
- if (!(xmml.extent_start = malloc(m2p_chunks * sizeof(unsigned long)))) {
+ if (!(extent_start = malloc(m2p_chunks * sizeof(unsigned long)))) {
ERR("failed to allocate space for m2p mfns");
return NULL;
}
+ SET_XEN_GUEST_HANDLE(xmml.extent_start, extent_start);
if (xc_memory_op(xc_handle, XENMEM_machphys_mfn_list, &xmml) ||
(xmml.nr_extents != m2p_chunks)) {
for (i=0; i < m2p_chunks; i++) {
entries[i].va = (unsigned long)(((void *)m2p) + (i * M2P_CHUNK_SIZE));
- entries[i].mfn = xmml.extent_start[i];
+ entries[i].mfn = extent_start[i];
entries[i].npages = M2P_CHUNK_SIZE >> PAGE_SHIFT;
}
return NULL;
}
- free(xmml.extent_start);
+ free(extent_start);
free(entries);
return m2p;
unsigned int nr_chars = *pnr_chars;
op.cmd = DOM0_READCONSOLE;
- op.u.readconsole.buffer = buffer;
+ SET_XEN_GUEST_HANDLE(op.u.readconsole.buffer, buffer);
op.u.readconsole.count = nr_chars;
op.u.readconsole.clear = clear;
return ret;
if ( (ret = do_dom0_op(xc_handle, &op)) == 0 )
- {
- *pbuffer = op.u.readconsole.buffer;
*pnr_chars = op.u.readconsole.count;
- }
safe_munlock(buffer, nr_chars);
op.cmd = DOM0_PERFCCONTROL;
op.u.perfccontrol.op = opcode;
- op.u.perfccontrol.desc = desc;
+ SET_XEN_GUEST_HANDLE(op.u.perfccontrol.desc, desc);
rc = do_dom0_op(xc_handle, &op);
op.cmd = DOM0_GETPAGEFRAMEINFO2;
op.u.getpageframeinfo2.domain = (domid_t)dom;
op.u.getpageframeinfo2.num = num;
- op.u.getpageframeinfo2.array = arr;
+ SET_XEN_GUEST_HANDLE(op.u.getpageframeinfo2.array, arr);
return do_dom0_op(xc_handle, &op);
}
struct xen_memory_reservation *reservation = arg;
struct xen_machphys_mfn_list *xmml = arg;
struct xen_translate_gpfn_list *trans = arg;
+ unsigned long *extent_start;
+ unsigned long *gpfn_list;
+ unsigned long *mfn_list;
long ret = -EINVAL;
hypercall.op = __HYPERVISOR_memory_op;
PERROR("Could not mlock");
goto out1;
}
- if ( (reservation->extent_start != NULL) &&
- (mlock(reservation->extent_start,
+ GET_XEN_GUEST_HANDLE(extent_start, reservation->extent_start);
+ if ( (extent_start != NULL) &&
+ (mlock(extent_start,
reservation->nr_extents * sizeof(unsigned long)) != 0) )
{
PERROR("Could not mlock");
PERROR("Could not mlock");
goto out1;
}
- if ( mlock(xmml->extent_start,
+ GET_XEN_GUEST_HANDLE(extent_start, reservation->extent_start);
+ if ( mlock(extent_start,
xmml->max_extents * sizeof(unsigned long)) != 0 )
{
PERROR("Could not mlock");
PERROR("Could not mlock");
goto out1;
}
- if ( mlock(trans->gpfn_list, trans->nr_gpfns * sizeof(long)) != 0 )
+ GET_XEN_GUEST_HANDLE(gpfn_list, trans->gpfn_list);
+ if ( mlock(gpfn_list, trans->nr_gpfns * sizeof(long)) != 0 )
{
PERROR("Could not mlock");
safe_munlock(trans, sizeof(*trans));
goto out1;
}
- if ( mlock(trans->mfn_list, trans->nr_gpfns * sizeof(long)) != 0 )
+ GET_XEN_GUEST_HANDLE(mfn_list, trans->mfn_list);
+ if ( mlock(mfn_list, trans->nr_gpfns * sizeof(long)) != 0 )
{
PERROR("Could not mlock");
- safe_munlock(trans->gpfn_list, trans->nr_gpfns * sizeof(long));
+ safe_munlock(gpfn_list, trans->nr_gpfns * sizeof(long));
safe_munlock(trans, sizeof(*trans));
goto out1;
}
case XENMEM_decrease_reservation:
case XENMEM_populate_physmap:
safe_munlock(reservation, sizeof(*reservation));
- if ( reservation->extent_start != NULL )
- safe_munlock(reservation->extent_start,
+ GET_XEN_GUEST_HANDLE(extent_start, reservation->extent_start);
+ if ( extent_start != NULL )
+ safe_munlock(extent_start,
reservation->nr_extents * sizeof(unsigned long));
break;
case XENMEM_machphys_mfn_list:
safe_munlock(xmml, sizeof(*xmml));
- safe_munlock(xmml->extent_start,
+ GET_XEN_GUEST_HANDLE(extent_start, reservation->extent_start);
+ safe_munlock(extent_start,
xmml->max_extents * sizeof(unsigned long));
break;
case XENMEM_add_to_physmap:
safe_munlock(arg, sizeof(struct xen_add_to_physmap));
break;
case XENMEM_translate_gpfn_list:
- safe_munlock(trans->mfn_list, trans->nr_gpfns * sizeof(long));
- safe_munlock(trans->gpfn_list, trans->nr_gpfns * sizeof(long));
+ GET_XEN_GUEST_HANDLE(mfn_list, trans->mfn_list);
+ safe_munlock(mfn_list, trans->nr_gpfns * sizeof(long));
+ GET_XEN_GUEST_HANDLE(gpfn_list, trans->gpfn_list);
+ safe_munlock(gpfn_list, trans->nr_gpfns * sizeof(long));
safe_munlock(trans, sizeof(*trans));
break;
}
op.cmd = DOM0_GETMEMLIST;
op.u.getmemlist.domain = (domid_t)domid;
op.u.getmemlist.max_pfns = max_pfns;
- op.u.getmemlist.buffer = pfn_buf;
+ SET_XEN_GUEST_HANDLE(op.u.getmemlist.buffer, pfn_buf);
#ifdef VALGRIND
memset(pfn_buf, 0, max_pfns * sizeof(unsigned long));
dom0_op_t op;
op.u.getdomaininfolist.first_domain = first_domain;
op.u.getdomaininfolist.max_domains = max_domains;
- op.u.getdomaininfolist.buffer = info;
+ SET_XEN_GUEST_HANDLE(op.u.getdomaininfolist.buffer, info);
if (mlock( info, max_domains * sizeof(dom0_getdomaininfo_t)) < 0) {
perror("Failed to mlock domaininfo array");
#ifndef __HYPERVISOR_IF_IA64_H__
#define __HYPERVISOR_IF_IA64_H__
-#ifdef __XEN__
#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef struct { type *p; } __guest_handle_ ## name
-#else
-#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
- typedef type * __guest_handle_ ## name
-#endif
-#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
-#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
+#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
+#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
+#define SET_XEN_GUEST_HANDLE(hnd, val) do { (hnd).p = val; } while (0)
+#define GET_XEN_GUEST_HANDLE(val, hnd) do { val = (hnd).p; } while (0)
#ifndef __ASSEMBLY__
/* Guest handles for primitive C types. */
#ifndef __XEN_PUBLIC_ARCH_X86_32_H__
#define __XEN_PUBLIC_ARCH_X86_32_H__
-#ifdef __XEN__
#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef struct { type *p; } __guest_handle_ ## name
-#else
-#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
- typedef type * __guest_handle_ ## name
-#endif
-#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
-#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
+#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
+#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
+#define SET_XEN_GUEST_HANDLE(hnd, val) do { (hnd).p = val; } while (0)
+#define GET_XEN_GUEST_HANDLE(val, hnd) do { val = (hnd).p; } while (0)
#ifndef __ASSEMBLY__
/* Guest handles for primitive C types. */
#ifndef __XEN_PUBLIC_ARCH_X86_64_H__
#define __XEN_PUBLIC_ARCH_X86_64_H__
-#ifdef __XEN__
#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef struct { type *p; } __guest_handle_ ## name
-#else
-#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
- typedef type * __guest_handle_ ## name
-#endif
-#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
-#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
+#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
+#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
+#define SET_XEN_GUEST_HANDLE(hnd, val) do { (hnd).p = val; } while (0)
+#define GET_XEN_GUEST_HANDLE(val, hnd) do { val = (hnd).p; } while (0)
#ifndef __ASSEMBLY__
/* Guest handles for primitive C types. */